{
struct mm_struct * mm = current->mm;
__u32 entry_1, entry_2, *lp;
- unsigned long phys_lp;
+ unsigned long mach_lp;
int error;
struct modify_ldt_ldt_s ldt_info;
}
lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
- phys_lp = arbitrary_virt_to_phys(lp);
+ mach_lp = arbitrary_virt_to_machine(lp);
/* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
/* Install the new entry ... */
install:
- error = HYPERVISOR_update_descriptor(phys_lp, entry_1, entry_2);
+ error = HYPERVISOR_update_descriptor(mach_lp, entry_1, entry_2);
out_unlock:
up(&mm->context.sem);
}
}
-static inline unsigned long arbitrary_virt_to_phys(void *va)
+static inline unsigned long arbitrary_virt_to_machine(void *va)
{
pgd_t *pgd = pgd_offset_k((unsigned long)va);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
{
struct mm_struct * mm = current->mm;
__u32 entry_1, entry_2, *lp;
- unsigned long phys_lp;
+ unsigned long mach_lp;
int error;
struct user_desc ldt_info;
}
lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
- phys_lp = arbitrary_virt_to_phys(lp);
+ mach_lp = arbitrary_virt_to_machine(lp);
/* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
/* Install the new entry ... */
install:
- error = HYPERVISOR_update_descriptor(phys_lp, entry_1, entry_2);
+ error = HYPERVISOR_update_descriptor(mach_lp, entry_1, entry_2);
out_unlock:
up(&mm->context.sem);
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames) << PAGE_SHIFT;
- code_resource.start = virt_to_phys(_text);
- code_resource.end = virt_to_phys(_etext)-1;
- data_resource.start = virt_to_phys(_etext);
- data_resource.end = virt_to_phys(_edata)-1;
+ /* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
+ /*code_resource.start = virt_to_phys(_text);*/
+ /*code_resource.end = virt_to_phys(_etext)-1;*/
+ /*data_resource.start = virt_to_phys(_etext);*/
+ /*data_resource.end = virt_to_phys(_edata)-1;*/
parse_cmdline_early(cmdline_p);
void __init zone_sizes_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
- unsigned int max_dma, high, low;
+ unsigned int /*max_dma,*/ high, low;
- max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ /*
+ * XEN: Our notion of "DMA memory" is fake when running over Xen.
+ * We simply put all RAM in the DMA zone so that those drivers which
+ * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
+ * Those drivers that *do* require lowmem are screwed anyway when
+ * running over Xen!
+ */
+ /*max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;*/
low = max_low_pfn;
high = highend_pfn;
- if (low < max_dma)
+ /*if (low < max_dma)*/
zones_size[ZONE_DMA] = low;
- else {
- zones_size[ZONE_DMA] = max_dma;
- zones_size[ZONE_NORMAL] = low - max_dma;
+ /*else*/ {
+ /*zones_size[ZONE_DMA] = max_dma;*/
+ /*zones_size[ZONE_NORMAL] = low - max_dma;*/
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = high - low;
#endif
#else
+/*
+ * Is @address within a RAM page that is local to this virtual machine (i.e.,
+ * not an I/O page; not a RAM page belonging to another VM). See the comment
+ * that accompanies pte_pfn() in pgtable-2level.h to understand why this works.
+ */
+static inline int is_local_ram(unsigned long address)
+{
+ unsigned long mfn = address >> PAGE_SHIFT;
+ unsigned long pfn = mfn_to_pfn(mfn);
+ if (pfn < max_mapnr) {
+ if (pfn_to_mfn(pfn) == mfn)
+ return 1; /* local ram */
+ printk("is_local_ram: ioremapping foreign ram (a bad idea).\n");
+ }
+ return 0; /* i/o memory or foreign ram */
+}
+
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
if (!size || last_addr < phys_addr)
return NULL;
- if (phys_addr >= 0x0 && last_addr < 0x100000)
- return isa_bus_to_virt(phys_addr);
-
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
- if (phys_addr >= 0xA0000 && last_addr < 0x100000)
- return (void __iomem *) phys_to_virt(phys_addr);
+ if (phys_addr >= 0x0 && last_addr < 0x100000)
+ return isa_bus_to_virt(phys_addr);
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
- if (machine_to_phys(phys_addr) < virt_to_phys(high_memory)) {
+ if (is_local_ram(phys_addr)) {
char *t_addr, *t_end;
struct page *page;
/* Guaranteed to be > phys_addr, as per __ioremap() */
last_addr = phys_addr + size - 1;
- if (machine_to_phys(last_addr) < virt_to_phys(high_memory)) {
+ if (is_local_ram(last_addr)) {
struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
unsigned long npages;
return;
}
- if (p->flags && machine_to_phys(p->phys_addr) < virt_to_phys(high_memory)) {
+ if (p->flags && is_local_ram(p->phys_addr)) {
change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
p->size >> PAGE_SHIFT,
PAGE_KERNEL);
if (!size || last_addr < phys_addr)
return NULL;
- if (phys_addr >= 0x0 && last_addr < 0x100000)
- return isa_bus_to_virt(phys_addr);
-
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
- if (phys_addr >= 0xA0000 && last_addr < 0x100000)
- return phys_to_virt(phys_addr);
+ if (phys_addr >= 0x0 && last_addr < 0x100000)
+ return isa_bus_to_virt(phys_addr);
/*
* Mappings have to be page-aligned
clear_page(pte);
make_page_readonly(pte);
- queue_pte_pin(virt_to_phys(pte));
+ queue_pte_pin(__pa(pte));
flush_page_update_queue();
}
struct page *page = virt_to_page(pte);
ClearPageForeign(page);
- queue_pte_unpin(virt_to_phys(pte));
+ queue_pte_unpin(__pa(pte));
make_page_writable(pte);
flush_page_update_queue();
}
*
* Allow them on x86 for legacy drivers, though.
*/
-#define virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
-#define bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
+#define bus_to_virt(_x) __va(machine_to_phys(_x))
/*
* readX/writeX() are used to access memory mapped devices. On some
void make_pages_readonly(void *va, unsigned int nr);
void make_pages_writable(void *va, unsigned int nr);
-static inline unsigned long arbitrary_virt_to_phys(void *va)
+static inline unsigned long arbitrary_virt_to_machine(void *va)
{
pgd_t *pgd = pgd_offset_k((unsigned long)va);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);